--- title: Feature keywords: fastai sidebar: home_sidebar summary: "Module for working with local features: keypoints, detectors, matches, etc." description: "Module for working with local features: keypoints, detectors, matches, etc." nb_path: "nbs/feature.ipynb" ---
Let's detect ORB keypoints and convert them to and from OpenCV
%matplotlib inline
import matplotlib.pyplot as plt
img = cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB)
det = cv2.ORB_create(500)
kps, descs = det.detectAndCompute(img, None)
out_img = cv2.drawKeypoints(img, kps, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
lafs, r = laf_from_opencv_kpts(kps, 1.0, with_resp=True)
visualize_LAF(K.image_to_tensor(img, False), lafs, 0, 'y', figsize=(8,6))
kps_back = opencv_kpts_from_laf(lafs, 1.0, r)
out_img = cv2.drawKeypoints(img, kps_back, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
OpenCV uses different conventions for the local feature scale.
E.g. to get equivalent kornia LAF from ORB keypoints, one should you mrSize = 0.5, while for SIFT -- 6.0. The orientation convention is also different for kornia and OpenCV.
%matplotlib inline
import cv2
import matplotlib.pyplot as plt
img = cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB)
det = cv2.SIFT_create(500)
kps, descs = det.detectAndCompute(img, None)
out_img = cv2.drawKeypoints(img, kps, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.imshow(out_img)
The keypoints are small, because, unlike for ORB, for SIFT OpenCV draws not real regions to be described, but the radius of the blobs, which are detected. Kornia and kornia_moons, inlike OpenCV, shows the real description region.
lafs, r = laf_from_opencv_SIFT_kpts(kps, with_resp=True)
visualize_LAF(K.image_to_tensor(img, False), lafs, 0, 'y', figsize=(8,6))
If you want to see the image, similar to OpenCV one, you can scale LAFs by factor 1/12.
visualize_LAF(K.image_to_tensor(img, False), K.feature.laf.scale_laf(lafs, 1./6.0), 0, 'y', figsize=(8,6))
Now let's do the same for matches format
from torch import allclose
match_dists, match_idxs = K.feature.match_nn(torch.from_numpy(descs).float(),
torch.from_numpy(descs).float())
cv2_matches = cv2_matches_from_kornia(match_dists, match_idxs)
out_img = cv2.drawMatches(img, kps, img, kps, cv2_matches, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plt.figure(figsize=(10,5))
plt.imshow(out_img)
match_dists_back, match_idxs_back = kornia_matches_from_cv2(cv2_matches)
assert(allclose(match_dists_back, match_dists))
assert(allclose(match_idxs_back, match_idxs))
assert isinstance(to_numpy_image('data/strahov.png'), np.ndarray)
We will visualize ORB features (blue), tentative matches (yellow) and inliers(greenish)
import numpy as np
det = cv2.ORB_create(100)
img1_fname = 'data/strahov.png'
kps1, descs1 = det.detectAndCompute(cv2.imread(img1_fname,0), None)
lafs1 = laf_from_opencv_ORB_kpts(kps1)
idxs = torch.stack([torch.arange(50),torch.arange(50)], dim=-1)
draw_LAF_matches(lafs1, lafs1, idxs,
img1_fname,img1_fname,
[True if i%2 == 0 else False for i in range(len(idxs))],
draw_dict={"inlier_color": (0.2, 1, 0.2),
"tentative_color": (0.8, 0.8, 0),
"feature_color": (0.2, 0.5, 1),
"vertical": False})
Now let's try with epipolar matrix for the translation. Inliers should lie on the horizontal epipolar lines
Fmat = np.array([[0., 0., 0.],
[0, 0, -1],
[0, 1, 0]])
draw_LAF_matches(lafs1, lafs1, idxs,
img1_fname,img1_fname,
[True if i%2 == 0 else False for i in range(len(idxs))],
draw_dict={"inlier_color": (0.2, 1, 0.2),
"tentative_color": (0.8, 0.8, 0),
"feature_color": (0.2, 0.5, 1),
"vertical": True}, Fm = Fmat)
Now we will transform the image, match it, find the homography and visualize it.
import numpy as np
det = cv2.SIFT_create(100)
img1_fname = 'data/strahov.png'
img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)
Hgt = np.array([[0.5, 0.1, 10],
[-0.1, 0.5, 10],
[0, 0, 1]])
img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))
kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)
kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)
match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
torch.from_numpy(descs2).float(), 0.98)
H, mask = cv2.findHomography(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
cv2.USAC_MAGSAC, 0.5)
draw_LAF_matches(lafs1, lafs2, match_idxs,
img1, img2,
mask,
draw_dict={"inlier_color": (0.2, 1, 0.2),
"tentative_color": (0.8, 0.8, 0),
"feature_color": None,
"vertical": False}, H = H)
And the same with fundamental matrix
import numpy as np
det = cv2.SIFT_create(75)
img1_fname = 'data/strahov.png'
img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)
Hgt = np.array([[0.75, -0.1, 10],
[0.1, 0.75, 10],
[0, 0, 1]])
img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))
kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)
kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)
match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
torch.from_numpy(descs2).float(), 0.95)
Fmat, mask = cv2.findFundamentalMat(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
cv2.USAC_MAGSAC, 0.5)
draw_LAF_matches(lafs1, lafs2, match_idxs,
img1, img2,
mask,
draw_dict={"inlier_color": None,#(0.2, 1, 0.2),
"tentative_color": (0.8, 0.8, 0),
"feature_color": None,
"vertical": True})
!wget https://www.robots.ox.ac.uk/~vgg/research/affine/det_eval_files/graf.tar.gz
!tar -xzf graf.tar.gz
import numpy as np
det = cv2.SIFT_create(500)
img1_fname = 'img1.ppm'
img2_fname = 'img4.ppm'
img1 = cv2.cvtColor(cv2.imread(img1_fname), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(cv2.imread(img2_fname), cv2.COLOR_BGR2RGB)
Hgt = np.loadtxt('H1to4p')
img2 = cv2.warpPerspective(img1, Hgt, img1.shape[:2][::-1], borderValue=(255,255,255))
kps1, descs1 = det.detectAndCompute(img1, None)
lafs1 = laf_from_opencv_SIFT_kpts(kps1)
kps2, descs2 = det.detectAndCompute(img2, None)
lafs2 = laf_from_opencv_SIFT_kpts(kps2)
match_dists, match_idxs = K.feature.match_snn(torch.from_numpy(descs1).float(),
torch.from_numpy(descs2).float(), 0.98)
H, mask = cv2.findHomography(KF.get_laf_center(lafs1[:,match_idxs[:,0]]).detach().cpu().numpy().reshape(-1,2),
KF.get_laf_center(lafs2[:,match_idxs[:,1]]).detach().cpu().numpy().reshape(-1,2),
cv2.USAC_MAGSAC, 0.5)
draw_LAF_inliers_perspective_repjojected(lafs1, lafs2, match_idxs,
cv2.cvtColor(cv2.cvtColor(img1,cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB),
cv2.cvtColor(cv2.cvtColor(img2,cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB),
mask, H = H)
import matplotlib.pyplot as plt
kornia_cv2dog = OpenCVDetectorKornia(cv2.SIFT_create(500))
kornia_cv2sift = OpenCVFeatureKornia(cv2.SIFT_create(500))
timg = K.image_to_tensor(cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB), False).float()/255.
lafs, r = kornia_cv2dog(timg)
lafs2, r2, descs2 = kornia_cv2sift(timg)
visualize_LAF(timg, lafs, 0, 'y', figsize=(8,6))
import matplotlib.pyplot as plt
kornia_cv2dogaffnet = OpenCVDetectorWithAffNetKornia(cv2.SIFT_create(500), make_upright=True)
timg = K.image_to_tensor(cv2.cvtColor(cv2.imread('data/strahov.png'), cv2.COLOR_BGR2RGB), False).float()/255.
lafs, r = kornia_cv2dogaffnet(timg)
visualize_LAF(timg, lafs, 0, 'y', figsize=(8,6))